420951dcqyUCe_gXA_XJPu1ix_poKg xen/include/asm-x86/vmx_virpit.h
41c0c412lQ0NVVN9PsOSznQ-qhOiPA xen/include/asm-x86/vmx_vmcs.h
418fbcfe_WliJPToeVM-9VStvym-hw xen/include/asm-x86/x86_32/asm_defns.h
-3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/x86_32/current.h
3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/x86_32/domain_page.h
4208e2a3ZNFroNXbX9OYaOB-xtUyDQ xen/include/asm-x86/x86_32/page.h
3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/regs.h
3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/x86_32/string.h
3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h
41bf1717bML6GxpclTWJabiaO5W5vg xen/include/asm-x86/x86_64/asm_defns.h
-404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
41febc4b1aCGLsm0Y0b_82h7lFtrEA xen/include/asm-x86/x86_64/domain_page.h
4208e2a3Fktw4ZttKdDxbhvTQ6brfQ xen/include/asm-x86/x86_64/page.h
404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/regs.h
t->ss0 = __HYPERVISOR_DS;
t->esp0 = get_stack_bottom();
#elif defined(CONFIG_X86_64)
+ /* Bottom-of-stack must be 16-byte aligned or CPU will force it! :-o */
+ BUG_ON((get_stack_bottom() & 15) != 0);
t->rsp0 = get_stack_bottom();
#endif
set_tss_desc(nr,t);
/* Must do this early -- e.g., spinlocks rely on get_current(). */
set_current(&idle0_exec_domain);
+ set_processor_id(0);
/* We initialise the serial devices very early so we can get debugging. */
serial_init_stage1();
extern void cpu_init(void);
set_current(idle_task[cpu]);
+ set_processor_id(cpu);
percpu_traps_init();
stack = (void *)alloc_xenheap_pages(STACK_ORDER);
#if defined(__i386__)
- stack_start.esp = __pa(stack) + STACK_SIZE - STACK_RESERVED;
+ stack_start.esp = __pa(stack);
#elif defined(__x86_64__)
- stack_start.esp = (unsigned long)stack + STACK_SIZE - STACK_RESERVED;
+ stack_start.esp = (unsigned long)stack;
#endif
+ stack_start.esp += STACK_SIZE - sizeof(struct cpu_info);
/* Debug build: detect stack overflow by setting up a guard page. */
memguard_guard_stack(stack);
+/******************************************************************************
+ * current.h
+ *
+ * Information structure that lives at the bottom of the per-cpu Xen stack.
+ */
-#ifdef __x86_64__
-#include <asm/x86_64/current.h>
-#else
-#include <asm/x86_32/current.h>
-#endif
+#ifndef __X86_CURRENT_H__
+#define __X86_CURRENT_H__
+
+#include <xen/config.h>
+#include <public/xen.h>
+#include <asm/page.h>
+
+struct exec_domain;
+
+struct cpu_info {
+ struct cpu_user_regs guest_cpu_user_regs;
+ unsigned int processor_id;
+ struct exec_domain *current_ed;
+};
+
+static inline struct cpu_info *get_cpu_info(void)
+{
+ struct cpu_info *cpu_info;
+ __asm__ ( "and %%"__OP"sp,%0; or %2,%0"
+ : "=r" (cpu_info)
+ : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-sizeof(struct cpu_info))
+ );
+ return cpu_info;
+}
+
+#define get_current() (get_cpu_info()->current_ed)
+#define set_current(_ed) (get_cpu_info()->current_ed = (_ed))
+#define current (get_current())
+
+#define get_processor_id() (get_cpu_info()->processor_id)
+#define set_processor_id(_id) (get_cpu_info()->processor_id = (_id))
+
+#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
+
+/*
+ * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points
+ * into the middle of cpu_info.guest_cpu_user_regs, at the section that
+ * precisely corresponds to a CPU trap frame.
+ */
+#define get_stack_bottom() \
+ ((unsigned long)&get_cpu_info()->guest_cpu_user_regs.es)
+
+#define reset_stack_and_jump(__fn) \
+ __asm__ __volatile__ ( \
+ "mov %0,%%"__OP"sp; jmp "STR(__fn) \
+ : : "r" (guest_cpu_user_regs()) )
+
+#define schedule_tail(_ed) (((_ed)->arch.schedule_tail)(_ed))
+
+#endif /* __X86_CURRENT_H__ */
#include <xen/config.h>
#include <xen/kernel.h>
#include <xen/cpumask.h>
+#include <asm/current.h>
#endif
#ifdef CONFIG_X86_LOCAL_APIC
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define __smp_processor_id() (current->processor)
+#define __smp_processor_id() (get_processor_id())
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_callin_map;
+++ /dev/null
-
-#ifndef _X86_CURRENT_H
-#define _X86_CURRENT_H
-
-struct domain;
-
-#define STACK_RESERVED \
- (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
-
-static inline struct exec_domain *get_current(void)
-{
- struct exec_domain *ed;
- __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0"
- : "=r" (ed) : "0" (STACK_SIZE-4) );
- return ed;
-}
-
-#define current get_current()
-
-static inline void set_current(struct exec_domain *ed)
-{
- __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)"
- : : "r" (STACK_SIZE-4), "r" (ed) );
-}
-
-static inline struct cpu_user_regs *guest_cpu_user_regs(void)
-{
- struct cpu_user_regs *cpu_user_regs;
- __asm__ ( "andl %%esp,%0; addl %2,%0"
- : "=r" (cpu_user_regs)
- : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
- return cpu_user_regs;
-}
-
-/*
- * Get the bottom-of-stack, as stored in the per-CPU TSS. This is actually
- * 20 bytes before the real bottom of the stack to allow space for:
- * domain pointer, DS, ES, FS, GS.
- */
-static inline unsigned long get_stack_bottom(void)
-{
- unsigned long p;
- __asm__ ( "andl %%esp,%0; addl %2,%0"
- : "=r" (p)
- : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-20) );
- return p;
-}
-
-#define reset_stack_and_jump(__fn) \
- __asm__ __volatile__ ( \
- "movl %0,%%esp; jmp "STR(__fn) \
- : : "r" (guest_cpu_user_regs()) )
-
-#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
-
-#endif /* _X86_CURRENT_H */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-
-#ifndef _X86_64_CURRENT_H
-#define _X86_64_CURRENT_H
-
-struct domain;
-
-#define STACK_RESERVED \
- (sizeof(struct cpu_user_regs) + sizeof(struct domain *) + 8)
-
-static inline struct exec_domain *get_current(void)
-{
- struct exec_domain *ed;
- __asm__ ( "orq %%rsp,%0; andq $~7,%0; movq (%0),%0"
- : "=r" (ed) : "0" (STACK_SIZE-8) );
- return ed;
-}
-
-#define current get_current()
-
-static inline void set_current(struct exec_domain *ed)
-{
- __asm__ ( "orq %%rsp,%0; andq $~7,%0; movq %1,(%0)"
- : : "r" (STACK_SIZE-8), "r" (ed) );
-}
-
-static inline struct cpu_user_regs *guest_cpu_user_regs(void)
-{
- struct cpu_user_regs *cpu_user_regs;
- __asm__( "andq %%rsp,%0; addq %2,%0"
- : "=r" (cpu_user_regs)
- : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
- return cpu_user_regs;
-}
-
-/*
- * Get the bottom-of-stack, as stored in the per-CPU TSS. This is actually
- * 48 bytes before the real bottom of the stack to allow space for:
- * domain pointer, padding, DS, ES, FS, GS. The padding is required to
- * have the stack pointer 16-byte aligned: the amount we subtract from
- * STACK_SIZE *must* be a multiple of 16.
- */
-static inline unsigned long get_stack_bottom(void)
-{
- unsigned long p;
- __asm__( "andq %%rsp,%0; addq %2,%0"
- : "=r" (p)
- : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-48) );
- return p;
-}
-
-#define reset_stack_and_jump(__fn) \
- __asm__ __volatile__ ( \
- "movq %0,%%rsp; jmp "STR(__fn) \
- : : "r" (guest_cpu_user_regs()) )
-
-#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
-
-#endif /* !(_X86_64_CURRENT_H) */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */